Remove page-scrub lists and async scrubbing.
authorKeir Fraser <keir.fraser@citrix.com>
Thu, 2 Jul 2009 15:45:31 +0000 (16:45 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Thu, 2 Jul 2009 15:45:31 +0000 (16:45 +0100)
The original user for this was domain destruction. Now that this is
preemptible all the way back up to dom0 userspace, asynchrony is
better iontroduced at that level, if at all, imo.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen/arch/ia64/xen/dom0_ops.c
xen/arch/ia64/xen/domain.c
xen/arch/x86/domain.c
xen/arch/x86/sysctl.c
xen/common/domain.c
xen/common/page_alloc.c
xen/common/tmem_xen.c
xen/include/xen/mm.h

index f27738d42d171a83b4482ebc344cef6751cb5018..37e9b9fefbf8c3ea621f939cb5df614e09ad39af 100644 (file)
@@ -718,7 +718,7 @@ long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
         pi->nr_nodes         = num_online_nodes();
         pi->total_pages      = total_pages; 
         pi->free_pages       = avail_domheap_pages();
-        pi->scrub_pages      = avail_scrub_pages();
+        pi->scrub_pages      = 0;
         pi->cpu_khz          = local_cpu_data->proc_freq / 1000;
 
         pi->max_cpu_id = last_cpu(cpu_online_map);
index 7dde9bf33a987ca31daa7dbe108ac32b9285f426..c8ba9e28eba4f23272f796b7974e7b077db0bb1b 100644 (file)
@@ -360,7 +360,6 @@ static void continue_cpu_idle_loop(void)
 #else
            irq_stat[cpu].idle_timestamp = jiffies;
 #endif
-           page_scrub_schedule_work();
            while ( !softirq_pending(cpu) )
                default_idle();
            raise_softirq(SCHEDULE_SOFTIRQ);
index 48b41a2bdaa64e1f0ab973c6bb751cdf7c68936a..722d518bba68b391263fc3c6167aabda42da5ec1 100644 (file)
@@ -120,7 +120,6 @@ void idle_loop(void)
     {
         if ( cpu_is_offline(smp_processor_id()) )
             play_dead();
-        page_scrub_schedule_work();
         (*pm_idle)();
         do_softirq();
     }
index faf3f5157c71bdd4d9a8ae86e510246862f9f175..651dbcfdd3522fb60e50f6357280303a7a7f93a6 100644 (file)
@@ -67,7 +67,7 @@ long arch_do_sysctl(
         pi->nr_nodes = num_online_nodes();
         pi->total_pages = total_pages;
         pi->free_pages = avail_domheap_pages();
-        pi->scrub_pages = avail_scrub_pages();
+        pi->scrub_pages = 0;
         pi->cpu_khz = cpu_khz;
         memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
         if ( hvm_enabled )
index 3b21006d361bfa06c10994bfe8bb186768eb2093..a2507a7b3d34b56cee90f017e9b7d5fcff9cc098 100644 (file)
@@ -394,7 +394,6 @@ int domain_kill(struct domain *d)
         /* fallthrough */
     case DOMDYING_dying:
         rc = domain_relinquish_resources(d);
-        page_scrub_kick();
         if ( rc != 0 )
         {
             BUG_ON(rc != -EAGAIN);
index bd514cfeed57ddfeec659491ef04b6d191c4abe4..9147e8a2b14375965b207291efd75c00fb0f1254 100644 (file)
@@ -64,18 +64,6 @@ integer_param("dma_bits", dma_bitsize);
 #define round_pgdown(_p)  ((_p)&PAGE_MASK)
 #define round_pgup(_p)    (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
 
-#ifndef NDEBUG
-/* Avoid callers relying on allocations returning zeroed pages. */
-#define scrub_page(p) memset((p), 0xc2, PAGE_SIZE)
-#else
-/* For a production build, clear_page() is the fastest way to scrub. */
-#define scrub_page(p) clear_page(p)
-#endif
-
-static DEFINE_SPINLOCK(page_scrub_lock);
-PAGE_LIST_HEAD(page_scrub_list);
-static unsigned long scrub_pages;
-
 /* Offlined page list, protected by heap_lock. */
 PAGE_LIST_HEAD(page_offlined_list);
 /* Broken page list, protected by heap_lock. */
@@ -945,7 +933,6 @@ void __init end_boot_allocator(void)
  */
 void __init scrub_heap_pages(void)
 {
-    void *p;
     unsigned long mfn;
 
     if ( !opt_bootscrub )
@@ -969,21 +956,7 @@ void __init scrub_heap_pages(void)
 
         /* Re-check page status with lock held. */
         if ( !allocated_in_map(mfn) )
-        {
-            if ( is_xen_heap_mfn(mfn) )
-            {
-                p = page_to_virt(mfn_to_page(mfn));
-                memguard_unguard_range(p, PAGE_SIZE);
-                scrub_page(p);
-                memguard_guard_range(p, PAGE_SIZE);
-            }
-            else
-            {
-                p = map_domain_page(mfn);
-                scrub_page(p);
-                unmap_domain_page(p);
-            }
-        }
+            scrub_one_page(mfn_to_page(mfn));
 
         spin_unlock(&heap_lock);
     }
@@ -1247,10 +1220,7 @@ void free_domheap_pages(struct page_info *pg, unsigned int order)
             for ( i = 0; i < (1 << order); i++ )
             {
                 page_set_owner(&pg[i], NULL);
-                spin_lock(&page_scrub_lock);
-                page_list_add(&pg[i], &page_scrub_list);
-                scrub_pages++;
-                spin_unlock(&page_scrub_lock);
+                scrub_one_page(&pg[i]);
             }
         }
     }
@@ -1322,96 +1292,19 @@ static __init int pagealloc_keyhandler_init(void)
 __initcall(pagealloc_keyhandler_init);
 
 
-
-/*************************
- * PAGE SCRUBBING
- */
-
-static DEFINE_PER_CPU(struct timer, page_scrub_timer);
-
-static void page_scrub_softirq(void)
-{
-    PAGE_LIST_HEAD(list);
-    struct page_info  *pg;
-    void             *p;
-    int               i;
-    s_time_t          start = NOW();
-    static spinlock_t serialise_lock = SPIN_LOCK_UNLOCKED;
-
-    /* free_heap_pages() does not parallelise well. Serialise this function. */
-    if ( !spin_trylock(&serialise_lock) )
-    {
-        set_timer(&this_cpu(page_scrub_timer), NOW() + MILLISECS(1));
-        return;
-    }
-
-    /* Aim to do 1ms of work every 10ms. */
-    do {
-        spin_lock(&page_scrub_lock);
-
-        /* Peel up to 16 pages from the list. */
-        for ( i = 0; i < 16; i++ )
-        {
-            if ( !(pg = page_list_remove_head(&page_scrub_list)) )
-                break;
-            page_list_add_tail(pg, &list);
-        }
-        
-        if ( unlikely(i == 0) )
-        {
-            spin_unlock(&page_scrub_lock);
-            goto out;
-        }
-
-        scrub_pages -= i;
-
-        spin_unlock(&page_scrub_lock);
-
-        /* Scrub each page in turn. */
-        while ( (pg = page_list_remove_head(&list)) ) {
-            p = map_domain_page(page_to_mfn(pg));
-            scrub_page(p);
-            unmap_domain_page(p);
-            free_heap_pages(pg, 0);
-        }
-    } while ( (NOW() - start) < MILLISECS(1) );
-
-    set_timer(&this_cpu(page_scrub_timer), NOW() + MILLISECS(10));
-
- out:
-    spin_unlock(&serialise_lock);
-}
-
-void scrub_list_splice(struct page_list_head *list)
-{
-    spin_lock(&page_scrub_lock);
-    page_list_splice(list, &page_scrub_list);
-    spin_unlock(&page_scrub_lock);
-}
-
-void scrub_list_add(struct page_info *pg)
-{
-    spin_lock(&page_scrub_lock);
-    page_list_add(pg, &page_scrub_list);
-    spin_unlock(&page_scrub_lock);
-}
-
 void scrub_one_page(struct page_info *pg)
 {
     void *p = map_domain_page(page_to_mfn(pg));
 
-    scrub_page(p);
-    unmap_domain_page(p);
-}
-
-static void page_scrub_timer_fn(void *unused)
-{
-    page_scrub_schedule_work();
-}
+#ifndef NDEBUG
+    /* Avoid callers relying on allocations returning zeroed pages. */
+    memset(p, 0xc2, PAGE_SIZE);
+#else
+    /* For a production build, clear_page() is the fastest way to scrub. */
+    clear_page(p);
+#endif
 
-unsigned long avail_scrub_pages(void)
-{
-    return scrub_pages;
+    unmap_domain_page(p);
 }
 
 static void dump_heap(unsigned char key)
@@ -1439,18 +1332,6 @@ static __init int register_heap_trigger(void)
 }
 __initcall(register_heap_trigger);
 
-
-static __init int page_scrub_init(void)
-{
-    int cpu;
-    for_each_cpu ( cpu )
-        init_timer(&per_cpu(page_scrub_timer, cpu),
-                   page_scrub_timer_fn, NULL, cpu);
-    open_softirq(PAGE_SCRUB_SOFTIRQ, page_scrub_softirq);
-    return 0;
-}
-__initcall(page_scrub_init);
-
 /*
  * Local variables:
  * mode: C
index f168535a2a80b8f5c3850e7f8865865db2be609a..9911e802ed9a3cc9458b79f6e6ec1d919009581a 100644 (file)
@@ -195,12 +195,14 @@ EXPORT unsigned long tmh_page_list_pages = 0;
 EXPORT void tmh_release_avail_pages_to_host(void)
 {
     spin_lock(&tmh_page_list_lock);
-    if ( !page_list_empty(&tmh_page_list) )
+    while ( !page_list_empty(&tmh_page_list) )
     {
-        scrub_list_splice(&tmh_page_list);
-        INIT_PAGE_LIST_HEAD(&tmh_page_list);
-        tmh_page_list_pages = 0;
+        struct page_info *pg = page_list_first(&tmh_page_list);
+        scrub_one_page(pg);
+        free_domheap_page(pg);
     }
+    INIT_PAGE_LIST_HEAD(&tmh_page_list);
+    tmh_page_list_pages = 0;
     spin_unlock(&tmh_page_list_lock);
 }
 
index 71e3181e2a4f1ec36286f03399b90d02337c2bc4..d00ac0a4bb5147870a5b85ca27ab0e68c8861149 100644 (file)
@@ -299,22 +299,7 @@ page_list_splice(struct page_list_head *list, struct page_list_head *head)
 # define page_list_splice(list, hd)        list_splice(list, hd)
 #endif
 
-/* Automatic page scrubbing for dead domains. */
-extern struct page_list_head page_scrub_list;
-#define page_scrub_schedule_work()                 \
-    do {                                           \
-        if ( !page_list_empty(&page_scrub_list) )  \
-            raise_softirq(PAGE_SCRUB_SOFTIRQ);     \
-    } while ( 0 )
-#define page_scrub_kick()                                               \
-    do {                                                                \
-        if ( !page_list_empty(&page_scrub_list) )                       \
-            cpumask_raise_softirq(cpu_online_map, PAGE_SCRUB_SOFTIRQ);  \
-    } while ( 0 )
-void scrub_list_splice(struct page_list_head *);
-void scrub_list_add(struct page_info *);
 void scrub_one_page(struct page_info *);
-unsigned long avail_scrub_pages(void);
 
 int guest_remove_page(struct domain *d, unsigned long gmfn);